akw27@boulderdash.cl.cam.ac.uk
akw27@labyrinth.cl.cam.ac.uk
+akw27@plucky.localdomain
iap10@labyrinth.cl.cam.ac.uk
kaf24@labyrinth.cl.cam.ac.uk
kaf24@plym.cl.cam.ac.uk
{
net_vif_t *new_vif;
net_ring_t *new_ring;
+ net_shadow_ring_t *shadow_ring;
struct task_struct *dom_task;
if ( !(dom_task = find_domain_by_id(domain)) )
new_ring = dom_task->net_ring_base + dom_task->num_net_vifs;
memset(new_ring, 0, sizeof(net_ring_t));
+ // allocate the shadow ring.
+ // maybe these should be kmem_cache instead of kmalloc?
+
+ shadow_ring = kmalloc(sizeof(net_shadow_ring_t), GFP_KERNEL);
+ if (shadow_ring == NULL) goto fail;
+
+ shadow_ring->tx_ring = kmalloc(TX_RING_SIZE
+ * sizeof(tx_shadow_entry_t), GFP_KERNEL);
+ shadow_ring->rx_ring = kmalloc(RX_RING_SIZE
+ * sizeof(rx_shadow_entry_t), GFP_KERNEL);
+ if ((shadow_ring->tx_ring == NULL) || (shadow_ring->rx_ring == NULL))
+ goto fail;
+
+ shadow_ring->rx_prod = 0;
+
+ // fill in the new vif struct.
+
new_vif->net_ring = new_ring;
+ new_vif->shadow_ring = shadow_ring;
+
+
skb_queue_head_init(&new_vif->skb_list);
new_vif->domain = domain;
dom_task->num_net_vifs++;
return new_vif;
+
+fail:
+ printk("VIF allocation failed!\n");
+ return NULL;
}
/* delete_net_vif - Delete the last vif in the given domain.
write_lock(&sys_vif_lock);
sys_vif_list[p->net_vif_list[i]->id] = NULL; // system vif list not gc'ed
write_unlock(&sys_vif_lock);
-
+
+ kfree(p->net_vif_list[i]->shadow_ring->tx_ring);
+ kfree(p->net_vif_list[i]->shadow_ring->rx_ring);
+ kfree(p->net_vif_list[i]->shadow_ring);
kmem_cache_free(net_vif_cache, p->net_vif_list[i]);
}
#include <linux/types.h>
typedef struct tx_entry_st {
- unsigned long addr; /* virtual address */
- unsigned long size; /* in bytes */
+ unsigned long addr; /* virtual address */
+ unsigned long size; /* in bytes */
+ int status; /* per descriptor status. */
} tx_entry_t;
typedef struct rx_entry_st {
- unsigned long addr; /* virtual address */
- unsigned long size; /* in bytes */
+ unsigned long addr; /* virtual address */
+ unsigned long size; /* in bytes */
+ int status; /* per descriptor status. */
} rx_entry_t;
#define TX_RING_SIZE 1024
/* Drop a new rule down to the network tables. */
int add_net_rule(net_rule_t *rule);
+
+/* Descriptor status values:
+ */
+
+#define RING_STATUS_OK 0 // Everything is gravy.
+#define RING_STATUS_ERR_CFU -1 // Copy from user problems.
+
#endif
#include <hypervisor-ifs/network.h>
#include <xeno/skbuff.h>
+/*
+ * shadow ring structures are used to protect the descriptors from
+ * tampering after they have been passed to the hypervisor.
+ *
+ * TX_RING_SIZE and RX_RING_SIZE are defined in the shared network.h.
+ */
+
+typedef struct tx_shadow_entry_st {
+ unsigned long addr;
+ unsigned long size;
+ int status;
+ unsigned long flush_count;
+} tx_shadow_entry_t;
+
+typedef struct rx_shadow_entry_st {
+ unsigned long addr;
+ unsigned long size;
+ int status;
+ unsigned long flush_count;
+} rx_shadow_entry_t;
+
+typedef struct net_shadow_ring_st {
+ tx_shadow_entry_t *tx_ring;
+ rx_shadow_entry_t *rx_ring;
+ unsigned int rx_prod; // trying to add shadow pointers only as I need to.
+} net_shadow_ring_t;
+
typedef struct net_vif_st {
- net_ring_t *net_ring;
- int id;
+ net_ring_t *net_ring;
+ net_shadow_ring_t *shadow_ring;
+ int id;
struct sk_buff_head skb_list;
unsigned int domain;
// rules table goes here in next revision.
void add_default_net_rule(int vif_id, u32 ipaddr);
int net_get_target_vif(struct sk_buff *skb);
void add_default_net_rule(int vif_id, u32 ipaddr);
+
+/* status fields per-descriptor:
+ */
+
+
struct sk_buff *skb;
shared_info_t *s = current->shared_info;
net_ring_t *net_ring;
+ net_shadow_ring_t *shadow_ring;
unsigned int i, nvif;
- rx_entry_t rx;
-
+ rx_shadow_entry_t *rx;
+
/* I have changed this to batch flush all vifs for a guest
* at once, whenever this is called. Since the guest is about to be
* scheduled and issued an RX interrupt for one nic, it might as well
for (nvif = 0; nvif < current->num_net_vifs; nvif++)
{
net_ring = current->net_vif_list[nvif]->net_ring;
+ shadow_ring = current->net_vif_list[nvif]->shadow_ring;
while ( (skb = skb_dequeue(¤t->net_vif_list[nvif]->skb_list))
!= NULL )
{
i = net_ring->rx_cons;
if ( i != net_ring->rx_prod )
{
- if ( !copy_from_user(&rx, net_ring->rx_ring+i, sizeof(rx)) )
+ if ( shadow_ring->rx_ring[i].status == RING_STATUS_OK)
{
- if ( (skb->len + ETH_HLEN) < rx.size )
- rx.size = skb->len + ETH_HLEN;
- copy_to_user((void *)rx.addr, skb->mac.raw, rx.size);
- copy_to_user(net_ring->rx_ring+i, &rx, sizeof(rx));
+ rx = shadow_ring->rx_ring+i;
+ if ( (skb->len + ETH_HLEN) < rx->size )
+ rx->size = skb->len + ETH_HLEN;
+ copy_to_user((void *)rx->addr, skb->mac.raw, rx->size);
+ copy_to_user(net_ring->rx_ring+i, rx, sizeof(rx));
}
net_ring->rx_cons = (i+1) & (RX_RING_SIZE-1);
if ( net_ring->rx_cons == net_ring->rx_event )
long do_net_update(void)
{
shared_info_t *shared = current->shared_info;
- net_ring_t *net_ring = current->net_ring_base;
+ net_ring_t *net_ring;
+ net_shadow_ring_t *shadow_ring;
net_vif_t *current_vif;
unsigned int i, j;
struct sk_buff *skb;
{
current_vif = current->net_vif_list[j];
net_ring = current_vif->net_ring;
+
+ /* First, we send out pending TX descriptors if they exist on this ring.
+ */
+
for ( i = net_ring->tx_cons; i != net_ring->tx_prod; i = TX_RING_INC(i) )
{
if ( copy_from_user(&tx, net_ring->tx_ring+i, sizeof(tx)) )
}
}
net_ring->tx_cons = i;
+
+ /* Next, pull any new RX descriptors across to the shadow ring.
+ * Note that in the next revision, these will reference PTEs and the
+ * code here will have to validate reference and flush counts, copy the
+ * descriptor, change the ownership to dom0 and invalidate the client's
+ * version of the page.
+ */
+
+ shadow_ring = current_vif->shadow_ring;
+
+ for (i = shadow_ring->rx_prod; i != net_ring->rx_prod; i = TX_RING_INC(i))
+ {
+ /* This copy assumes that rx_shadow_entry_t is an extension of rx_net_entry_t
+ * extra fields must be tacked on to the end.
+ */
+
+ if ( copy_from_user( shadow_ring->rx_ring+i, net_ring->rx_ring+i,
+ sizeof (rx_entry_t) ) )
+ {
+ shadow_ring->rx_ring[i].status = RING_STATUS_ERR_CFU;
+ } else {
+ shadow_ring->rx_ring[i].status = RING_STATUS_OK;
+ }
+ }
+
+ shadow_ring->rx_prod = net_ring->rx_prod;
}
return 0;